import tensorflow as tf
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import os
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import MinMaxScaler
from sklearn.linear_model import LinearRegression,Lasso,Ridge
from sklearn.ensemble import RandomForestRegressor,GradientBoostingRegressor
from sklearn.svm import SVR
from sklearn.neighbors import KNeighborsRegressor
from sklearn.metrics import mean_squared_error,r2_score,median_absolute_error
# Neural Network
from tensorflow.keras.models import Sequential
from tensorflow.keras import layers
from tensorflow.keras.layers import Dense, Activation
from tensorflow.keras import losses
from tensorflow.keras.losses import MAE,MSE
from tensorflow.keras import optimizers
from tensorflow.keras.optimizers import SGD,Adam
•DOMAIN: Electronics and Telecommunication
•CONTEXT: A communications equipment manufacturing company has a product which is responsible for emitting informative signals. Company wants to build a machine learning model which can help the company to predict the equipment’s signal quality using various parameters.
•DATA DESCRIPTION:</br> The data set contains information on various signal tests performed:</br> 1.Parameters: Various measurable signal parameters.</br> 2.Signal_Quality: Final signal strength or quality
•PROJECT OBJECTIVE: The need is to build a regressor which can use these parameters to determine the signal strength or quality [as number]
# importing dataset
data = pd.read_csv('Signal.csv')
data.head()
data.info()
data.describe().T
data.isnull().sum()
data['Signal_Strength'].unique()
plt.figure(figsize = (15,15))
for i in range(1,12):
plt.subplot(3,4,i)
sns.boxplot(x = data.columns[i],data = data)
plt.tight_layout()
plt.show()
plt.figure(figsize = (15,10))
for i in range(0,12):
#plt.figure(figsize = (15,15))
plt.subplot(3,4,i+1)
col = data.columns[i]
sns.kdeplot(x = col,data = data)
plt.tight_layout()
plt.show()
for i in range(0,12):
plt.figure(figsize = (15,15))
plt.subplot(3,4,i+1)
col = data.columns[i]
sns.boxplot(y = col,x = 'Signal_Strength',data = data)
plt.show()
#plt.figure(figsize = (15,10))
for i in range(0,12):
plt.figure(figsize = (15,15))
plt.subplot(3,4,i+1)
col = data.columns[i]
if col != 'Signal_Strength':
sns.kdeplot(x = col,hue = 'Signal_Strength',data = data)
plt.tight_layout()
plt.show()
plt.figure(figsize = (10,10))
sns.heatmap(data.corr(),annot = True,square = True,cbar = False)
plt.show()
plt.figure(figsize = (10,10))
sns.pairplot(data,hue = 'Signal_Strength')
plt.show()
# Data Preprocessing
scaler = MinMaxScaler()
scaled_data = pd.DataFrame(scaler.fit_transform(data),columns = data.columns)
X = scaled_data.iloc[:,:-1]
y = scaled_data.iloc[:,-1]
X_train,X_test,y_train,y_test = train_test_split(X,y,test_size=0.2,stratify=y,random_state=1)
X_train
y_train
# Buildinhg Machine learing models
lr = LinearRegression()
lasso = Lasso()
ridge = Ridge()
svr = SVR()
knn = KNeighborsRegressor()
rf = RandomForestRegressor()
gb = GradientBoostingRegressor()
# LR
lr.fit(X_train,y_train)
lr_pred = lr.predict(X_test)
print("Linear regressiont train score = ",lr.score(X_train,y_train))
# lassso
lasso.fit(X_train,y_train)
lasso_pred = lasso.predict(X_test)
print("lasso train score = ",lasso.score(X_train,y_train))
#ridge
ridge.fit(X_train,y_train)
ridge_pred = ridge.predict(X_test)
print("ridge train score = ",ridge.score(X_train,y_train))
#SVM
svr.fit(X_train,y_train)
svr_pred = svr.predict(X_test)
print("svr train score = ",svr.score(X_train,y_train))
# knn
knn.fit(X_train,y_train)
knn_pred = knn.predict(X_test)
print("knn train score = ",knn.score(X_train,y_train))
#rf
rf.fit(X_train,y_train)
rf_pred = rf.predict(X_test)
print("random forest train score = ",rf.score(X_train,y_train))
# gb
gb.fit(X_train,y_train)
gb_pred = gb.predict(X_test)
print("graidentBoost train score = ",gb.score(X_train,y_train))
# scores
print("Linear regressiont test score = ",lr.score(X_test,y_test))
print("lasso test score = ",lasso.score(X_test,y_test))
print("ridge test score = ",ridge.score(X_test,y_test))
print("svr test score = ",svr.score(X_test,y_test))
print("knn test score = ",knn.score(X_test,y_test))
print("random forest test score = ",rf.score(X_test,y_test))
print("graidentBoost test score = ",gb.score(X_test,y_test))
# scores
print("Linear regressiont test score = ",mean_squared_error(y_test,lr_pred))
print("lasso test score = ",mean_squared_error(y_test,lasso_pred))
print("ridge test score = ",mean_squared_error(y_test,ridge_pred))
print("svr test score = ",mean_squared_error(y_test,svr_pred))
print("knn test score = ",mean_squared_error(y_test,knn_pred))
print("random forest test score = ",mean_squared_error(y_test,rf_pred))
print("graidentBoost test score = ",mean_squared_error(y_test,gb_pred))
# building Ann
model_1 = Sequential([
Dense(100,activation = 'relu'),
Dense(50,activation = 'relu'),
Dense(10,activation = 'relu'),
Dense(1)
])
#2.compile
model_1.compile(loss = tf.keras.losses.mae,optimizer = tf.keras.optimizers.Adam(),metrics = ['mae'])
#model fitting
model_1.fit(X_train,y_train,epochs = 100)
#check the results
model_1.evaluate(X_test,y_test)
y_pred1 = model_1.predict(X_test)
y_pred1[1]-y_test[:1]
!pip install keras_tuner
import keras_tuner as kt
from keras_tuner import HyperModel
class ANNHyperModel(HyperModel):
def build(self, hp):
model = tf.keras.Sequential()
# Tune the number of units in the first Dense layer
# Choose an optimal value between 32-512
hp_units1 = hp.Int('units1', min_value=8, max_value=512, step=4)
hp_units2 = hp.Int('units2', min_value=16, max_value=512, step=4)
hp_units3 = hp.Int('units3', min_value=32, max_value=512, step=4)
model.add(Dense(units=hp_units1, activation='relu'))
model.add(tf.keras.layers.Dense(units=hp_units2, activation='relu'))
model.add(tf.keras.layers.Dense(units=hp_units3, activation='relu'))
model.add(Dense(1, kernel_initializer='normal', activation='linear'))
# Tune the learning rate for the optimizer
# Choose an optimal value from 0.01, 0.001, or 0.0001
hp_learning_rate = hp.Choice('learning_rate', values=[1e-2, 1e-3, 1e-4])
model.compile(
optimizer=tf.keras.optimizers.Adam(learning_rate=hp_learning_rate),
loss='mse',metrics=['mse']
)
return model
hypermodel = ANNHyperModel()
tuner_h1 = kt.Hyperband(
hypermodel,
objective='mse',
max_epochs=10,
factor=3,
#directory='keras_tuner_dir',
project_name='keras_tuner_demo_hyperband'
)
class RegressionHyperModel(HyperModel):
def build(self, hp):
model = Sequential()
model.add(
Dense(
units=hp.Int('units', 8, 512, 4, default=8),
activation = 'relu'
)
)
model.add(
Dense(
units=hp.Int('units', 16, 512, 4, default=16),
activation = 'relu'
)
)
model.add(
Dense(
units=hp.Int('units', 32, 512, 4, default=32),
activation = 'relu'
)
)
model.add(Dense(1))
model.compile(
optimizer='adam',loss='mse',metrics=['mse']
)
return model
hypermodel = RegressionHyperModel()
tuner_rs1 = kt.RandomSearch(
hypermodel,
objective='mse',
seed=42,
max_trials=10,
executions_per_trial=2,
directory=os.path.normpath('D:/'),
project_name='keras_tuner_demo_random_search')
# hypertuing Using Keras tuner
tuner_h1.search(X_train, y_train, epochs=10)
best_model_h1 = tuner_h1.get_best_models(num_models=1)[0]
loss, mse = best_model_h1.evaluate(X_test, y_test)
best_model_h1.summary()
# hypertuing Using Keras tuner [random_search]
tuner_rs1.search(X_train, y_train, epochs=10)
best_model_rs1 = tuner_rs1.get_best_models(num_models=1)[0]
loss, mse = best_model_rs1.evaluate(X_test, y_test)
best_model_rs1.summary()
y_pred_rs1 = best_model_rs1.predict(X_test)
mean_squared_error(y_test,y_pred_rs1)
y_pred_h1 = best_model_h1.predict(X_test)
mean_squared_error(y_test,y_pred_h1)
# lets handle outliers and see how rthe accuaracy improves
from scipy.stats import zscore
z_data = zscore(data)
z_data
# replacing the outliers with median
handle_X = data.iloc[:,:-1].copy()
for c in handle_X.columns:
q1 = handle_X[c].quantile(.25)
q3 = handle_X[c].quantile(.75)
#print(c,q1,q3)
IQR = q3-q1
up = q3 + (IQR * 1.5)
down = q1 - (1.5 * IQR)
up_f = handle_X[c] >= up
down_f = handle_X[c] <= down
# replacing all the outliers with mean
handle_X.loc[up_f,c] = handle_X[c].mean()
handle_X.loc[up_f,c] = handle_X[c].mean()
handle_X
plt.figure(figsize = (15,15))
for i in range(0,11):
plt.subplot(3,4,i+1)
sns.boxplot(x = handle_X.columns[i],data = handle_X)
plt.tight_layout()
plt.show()
X_train,X_test,y_train,y_test = train_test_split(MinMaxScaler().fit_transform(handle_X),y,test_size=0.2,stratify=y,random_state=1)
#Buildinhg Machine learing models
lr = LinearRegression()
lasso = Lasso()
ridge = Ridge()
svr = SVR()
knn = KNeighborsRegressor()
rf = RandomForestRegressor()
gb = GradientBoostingRegressor()
# LR
lr.fit(X_train,y_train)
lr_pred = lr.predict(X_test)
print("Linear regressiont train score = ",lr.score(X_train,y_train))
# lassso
lasso.fit(X_train,y_train)
lasso_pred = lasso.predict(X_test)
print("lasso train score = ",lasso.score(X_train,y_train))
#ridge
ridge.fit(X_train,y_train)
ridge_pred = ridge.predict(X_test)
print("ridge train score = ",ridge.score(X_train,y_train))
#SVM
svr.fit(X_train,y_train)
svr_pred = svr.predict(X_test)
print("svr train score = ",svr.score(X_train,y_train))
# knn
knn.fit(X_train,y_train)
knn_pred = knn.predict(X_test)
print("knn train score = ",knn.score(X_train,y_train))
#rf
rf.fit(X_train,y_train)
rf_pred = rf.predict(X_test)
print("random forest train score = ",rf.score(X_train,y_train))
# gb
gb.fit(X_train,y_train)
gb_pred = gb.predict(X_test)
print("graidentBoost train score = ",gb.score(X_train,y_train))
# scores
print("Linear regressiont test score = ",mean_squared_error(y_test,lr_pred))
print("lasso test score = ",mean_squared_error(y_test,lasso_pred))
print("ridge test score = ",mean_squared_error(y_test,ridge_pred))
print("svr test score = ",mean_squared_error(y_test,svr_pred))
print("knn test score = ",mean_squared_error(y_test,knn_pred))
print("random forest test score = ",mean_squared_error(y_test,rf_pred))
print("graidentBoost test score = ",mean_squared_error(y_test,gb_pred))
model_2 = Sequential([
Dense(100,activation = 'relu'),
Dense(50,activation = 'relu'),
Dense(10,activation = 'relu'),
Dense(1)
])
#2.compile
model_2.compile(loss = tf.keras.losses.mae,optimizer = tf.keras.optimizers.Adam(),metrics = ['mae'])
#model fitting
model_2.fit(X_train,y_train,epochs = 100)
#check the results
model_2.evaluate(X_test,y_test)
# hypertuing Using Keras tuner
hypermodel = ANNHyperModel()
tuner_h2 = kt.Hyperband(
hypermodel,
objective='mse',
max_epochs=10,
factor=3,
#directory='keras_tuner_dir',
project_name='keras_tuner_demo_h2'
)
tuner_h2.search(X_train, y_train, epochs=10)
best_model_h2 = tuner_h2.get_best_models(num_models=1)[0]
loss, mse = best_model_h2.evaluate(X_test, y_test)
best_model_h2.summary()
# random_search
hypermodel =RegressionHyperModel()
tuner_rs2 = kt.RandomSearch(
hypermodel,
objective='mse',
seed=42,
max_trials=10,
executions_per_trial=2,
directory=os.path.normpath('D:/'),
project_name='keras_tuner_demo__rs2')
tuner_rs2.search(X_train, y_train, epochs=10)
best_model_rs2 = tuner_rs2.get_best_models(num_models=1)[0]
loss, mse = best_model_rs2.evaluate(X_test, y_test)
best_model_rs2.summary()
y_pred_rs2 = best_model_rs2.predict(X_test)
mean_squared_error(y_test,y_pred_rs2)
# saving the better model for future use
# 1. When there are outliers
best_model_h1.save("best_model_with_outliers")
# 2. when there is no outliers
best_model_rs2.save("best_model_without_outliers")
•DOMAIN: Autonomous Vehicles
•BUSINESS CONTEXT: A Recognising multi-digit numbers in photographs captured at street level is an important component of modern-day map making. A classic example of a corpus of such street-level photographs is Google’s Street View imagery composed of hundreds of millions of geo-located 360-degree panoramic images. The ability to automatically transcribe an address number from a geo-located patch of pixels and associate the transcribed number with a known street address helps pinpoint, with a high degree of accuracy, the location of the building it represents. More broadly, recognising numbers in photographs is a problem of interest to the optical character recognition community. While OCR on constrained domains like document processing is well studied, arbitrary multi-character text recognition in photographs is still highly challenging. This difficulty arises due to the wide variability in the visual appearance of text in the wild on account of a large range of fonts, colours, styles, orientations, and character arrangements. The recognition problem is further complicated by environmental factors such as lighting, shadows, specularity, and occlusions as well as by image acquisition factors such as resolution, motion, and focus blurs. In this project, we will use the dataset with images centred around a single digit (many of the images do contain some distractors at the sides). Although we are taking a sample of the data which is simpler, it is more complex than MNIST because of the distractors.
•DATA DESCRIPTION: The SVHN is a real-world image dataset for developing machine learning and object recognition algorithms with the minimal requirement on data formatting but comes from a significantly harder, unsolved, real-world problem (recognising digits and numbers in natural scene images). SVHN is obtained from house numbers in Google Street View images.Where the labels for each of this image are the prominent number in that image i.e. 2,6,7 and 4 respectively.The dataset has been provided in the form of h5py files.
•PROJECT OBJECTIVE: We will build a digit classifier on the SVHN (Street View Housing Number) dataset
# Getting the packages
import h5py
from keras.utils.np_utils import to_categorical
from keras.layers import BatchNormalization, Dropout
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import confusion_matrix,accuracy_score
from sklearn.metrics import classification_report
from google.colab import drive
drive.mount('/content/drive')
# getting the dataset
data1=h5py.File("/content/drive/MyDrive/Part - 4 - Autonomous_Vehicles_SVHN_single_grey1.h5",'r')
#Load the training, testing, and validation data
X_train=data1['X_train']
X_test=data1['X_test']
X_val=data1['X_val']
y_train=data1['y_train']
y_test=data1['y_test']
y_val=data1['y_val']
!pwd
X_train[0]
# Printing the shape and data type of training, testing, and validation data
print("Training data X -- Shape :", X_train.shape,"and Data Type : ", X_train.dtype)
print("Testing data X -- Shape :", X_test.shape,"and Data Type : ", X_test.dtype)
print("Validation data X -- Shape :", X_val.shape,"and Data Type : ", X_val.dtype)
print("Training data y -- Shape :", y_train.shape,"and Data Type : ", y_train.dtype)
print("Testing data y -- Shape :", y_test.shape,"and Data Type : ", y_test.dtype)
print("Validation data y -- Shape :", y_val.shape,"and Data Type : ", y_val.dtype)
fig=plt.figure(figsize=(8,8))
columns=10
rows=10
for i in range(1, columns*rows+1):
img=X_test[i]
fig.add_subplot(rows,columns,i)
plt.imshow(img,cmap='gray')
plt.show()
# show the number in the dataset
plt.imshow(X_train[0],cmap='gray')
plt.show()
print('Label: ', y_train[0])
# show the number in the dataset
plt.imshow(X_test[0],cmap='gray')
plt.show()
print('Label: ', y_test[0])
# visualizing the first 10 images in the dataset and their labels
%matplotlib inline
import matplotlib.pyplot as plt
plt.figure(figsize=(10, 1))
for i in range(10):
plt.subplot(1, 10, i+1)
plt.imshow(X_train[i].reshape(32,32),cmap='gray')
plt.axis('off')
plt.show()
print('label for each of the above image: %s' % (y_train[0:10]))
#Reshape data from 2D to 1D -> 32X32 to 1024
X_train = np.asarray(X_train).reshape(42000,1024)
X_test = np.asarray(X_test).reshape(18000,1024)
X_val = np.asarray(X_val).reshape(60000,1024)
X_train=X_train/255
X_test=X_test/255
X_val= X_val/255
# Initializing the value of k and finding the accuracies on validation data
k_vals = range(1, 13, 2)
accuracies = []
for k in range(1, 13, 2):
knn = KNeighborsClassifier(n_neighbors=k)
knn.fit(X_train, y_train)
score = knn.score(X_val, y_val)
print("k value=%d, accuracy score=%.2f%%" % (k, score * 100))
accuracies.append(score)
# finding the value of k which has the largest accuracy
i = int(np.argmax(accuracies))
print("k=%d value has highest accuracy of %.2f%% on validation data" % (k_vals[i],accuracies[i] * 100))
# Retraining the model using the best k value and predict the labels on test data
knn = KNeighborsClassifier(n_neighbors=k_vals[i])
knn.fit(X_train, y_train)
predictions = knn.predict(X_test)
# show a final classification report demonstrating the accuracy of the classifier
print("EVALUATION ON TESTING DATA")
print(confusion_matrix(y_test,predictions))
print(classification_report(y_test, predictions))
plt.figure(figsize=(2,2))
plt.imshow(X_test[59].reshape(32,32))
plt.show()
image = X_test[15]
print(knn.predict(image.reshape(1, -1)))
#Reshape data from 2D to 1D -> 32X32 to 1024
X_train = np.asarray(X_train).reshape(42000,1024)
X_test = np.asarray(X_test).reshape(18000,1024)
X_val = np.asarray(X_val).reshape(60000,1024)
y_train = tf.keras.utils.to_categorical(y_train, num_classes=10)
y_test = tf.keras.utils.to_categorical(y_test, num_classes=10)
y_val = tf.keras.utils.to_categorical(y_val, num_classes=10)
print(X_train.shape, X_test.shape,X_val.shape, y_train.shape, y_test.shape,y_val.shape)
y_train = y_train.astype('int')
y_test = y_test.astype('int')
y_val = y_val.astype('int')
X_train[0]/255
np.isnan(X_train).sum()
np.isnan(y_train).sum()
np.isnan(X_val).sum()
np.isnan(y_val).sum()
##Initialize the Artificial Neural Network Classifier
keras_model = Sequential()
# Input Layer
#Adding Input layer and activation functions ReLU
keras_model.add(Dense(512,activation='relu'))
#Adding Activation function
# kernel_initializer='he_normal'
#keras_model.add(Activation('relu'))
#Hidden Layer 1
#Adding first Hidden layer
keras_model.add(Dense(256,activation='relu'))
#Adding Activation function
#keras_model.add(Activation('relu'))
#Hidden Layer 2
#Adding second Hidden layer
keras_model.add(Dense(128,activation='relu'))
#Adding Activation function
#keras_model.add(Activation('relu'))
#Hidden Layer 3
#Adding third Hidden layer
keras_model.add(Dense(64,activation='relu'))
#Adding Activation function
#keras_model.add(Activation('relu'))
#Hidden Layer 4
#Adding fourth Hidden layer
keras_model.add(Dense(32,activation='relu'))
#Adding Activation function
#keras_model.add(Activation('relu'))
# Output Layer
#Adding output layer which is of 10 nodes (digits)
keras_model.add(Dense(10,activation='softmax'))
#Adding Activation function
# Here, we are using softmax function because we have multiclass classsification
#keras_model.add(Activation('softmax'))
# compiling the ANN classifier
keras_model.compile(loss=tf.keras.losses.CategoricalCrossentropy(),optimizer = optimizers.Adam(),metrics = ['accuracy'])
# Fitting the ANN to the Training data
history = keras_model.fit(X_train/255, y_train, validation_data=(X_val/255,y_val),batch_size = 128, epochs = 10, verbose = 1)
keras_model.summary()
# compiling the ANN classifier
keras_model.compile(optimizer = 'adam', loss = 'categorical_crossentropy', metrics = ['accuracy'])
# Fitting the ANN to the Training data
history = keras_model.fit(X_train, y_train, validation_data=(X_val,y_val),batch_size = 200, epochs = 10, verbose = 1)
As, we are taking learning rate small means model will learn slowly. So, we have to take more number of epochs.
#Setting the learning rate and decay in sgd
sgd = optimizers.SGD(learning_rate = 0.0001)
# compiling the ANN classifier
keras_model.compile(optimizer = sgd, loss = 'categorical_crossentropy', metrics = ['accuracy'])
# Fitting the ANN to the Training data
history = keras_model.fit(X_train, y_train, validation_data=(X_val,y_val),batch_size = 200, epochs = 100, verbose = 1)
As, we are taking learning rate small means model will learn slowly. So, we have to take more number of epochs.
#Setting the learning rate and decay in sgd
adam = optimizers.Adam(learning_rate = 0.0001)
# compiling the ANN classifier
keras_model.compile(optimizer = adam, loss = 'categorical_crossentropy', metrics = ['accuracy'])
# Fitting the ANN to the Training data
history = keras_model.fit(X_train, y_train, validation_data=(X_val,y_val),batch_size = 200, epochs = 100, verbose = 1)
results = keras_model.evaluate(X_val, y_val)
print('Val_acc using simple NN adam : ', results[1])
#Store the accuracy results for each model in a dataframe for final comparison
results_on_val = pd.DataFrame({'Method':['NN'], 'accuracy': results[1]},index={'1'})
results_on_val = results_on_val[['Method', 'accuracy']]
results_on_val
##Initialize the Artificial Neural Network Classifier
keras_model_1 = Sequential()
# Input Layer
#Adding Input layer and activation functions ReLU
keras_model_1.add(Dense(512, kernel_initializer='he_normal',input_shape = (1024, )))
#Adding BatchNormalization Layer
keras_model_1.add(BatchNormalization())
#Adding Activation function
keras_model_1.add(Activation('relu'))
#Hidden Layer 1
#Adding first Hidden layer
keras_model_1.add(Dense(256, kernel_initializer='he_normal'))
#Adding BatchNormalization Layer
keras_model_1.add(BatchNormalization())
#Adding Activation function
keras_model_1.add(Activation('relu'))
#Hidden Layer 2
#Adding second Hidden layer
keras_model_1.add(Dense(128, kernel_initializer='he_normal'))
#Adding BatchNormalization Layer
keras_model_1.add(BatchNormalization())
#Adding Activation function
keras_model_1.add(Activation('relu'))
#Hidden Layer 3
#Adding third Hidden layer
keras_model_1.add(Dense(64, kernel_initializer='he_normal'))
#Adding BatchNormalization Layer
keras_model_1.add(BatchNormalization())
#Adding Activation function
keras_model_1.add(Activation('relu'))
#Hidden Layer 4
#Adding fourth Hidden layer
keras_model_1.add(Dense(32, kernel_initializer='he_normal'))
#Adding BatchNormalization Layer
keras_model_1.add(BatchNormalization())
#Adding Activation function
keras_model_1.add(Activation('relu'))
# Output Layer
#Adding output layer which is of 10 nodes (digidts)
keras_model_1.add(Dense(10))
#Adding Activation function
# Here, we are using softmax function because we have multiclass classsification
keras_model_1.add(Activation('softmax'))
keras_model_1.summary()
#Setting the learning rate and decay in sgd
adam = optimizers.Adam(learning_rate = 0.0001)
# compiling the ANN classifier
keras_model_1.compile(optimizer = adam, loss = 'categorical_crossentropy', metrics = ['accuracy'])
# Fitting the ANN to the Training data
history_1 = keras_model_1.fit(X_train, y_train, validation_data=(X_val,y_val),batch_size = 200, epochs = 100, verbose = 1)
results_1 = keras_model_1.evaluate(X_val, y_val)
print('Val_acc using Batch ANN adam : ', results_1[1])
#Store the accuracy results for each model in a dataframe for final comparison
tempResultsDf = pd.DataFrame({'Method':['NN_Batch_adam'], 'accuracy': [results_1[1]]},index={'2'})
results_on_val= pd.concat([results_on_val, tempResultsDf])
results_on_val = results_on_val[['Method', 'accuracy']]
results_on_val
#Initialize the Artificial Neural Network Classifier
keras_model_2 = Sequential()
# Input Layer
#Adding Input layer and activation functions ReLU
keras_model_2.add(Dense(512, kernel_initializer='he_normal',input_shape = (1024, )))
#Adding BatchNormalization Layer
keras_model_2.add(BatchNormalization())
#Adding Activation function
keras_model_2.add(Activation('relu'))
#Hidden Layer 1
#Adding first Hidden layer
keras_model_2.add(Dense(256, kernel_initializer='he_normal'))
#Adding BatchNormalization Layer
keras_model_2.add(BatchNormalization())
#Adding Activation function
keras_model_2.add(Activation('relu'))
#Hidden Layer 2
#Adding second Hidden layer
keras_model_2.add(Dense(128, kernel_initializer='he_normal'))
#Adding BatchNormalization Layer
keras_model_2.add(BatchNormalization())
#Adding Activation function
keras_model_2.add(Activation('relu'))
#Hidden Layer 3
#Adding third Hidden layer
keras_model_2.add(Dense(64, kernel_initializer='he_normal'))
#Adding BatchNormalization Layer
keras_model_2.add(BatchNormalization())
#Adding Activation function
keras_model_2.add(Activation('relu'))
#Hidden Layer 4
#Adding fourth Hidden layer
keras_model_2.add(Dense(32, kernel_initializer='he_normal'))
#Adding BatchNormalization Layer
keras_model_2.add(BatchNormalization())
#Adding Activation function
keras_model_2.add(Activation('relu'))
# Output Layer
#Adding output layer which is of 10 nodes (digidts)
keras_model_2.add(Dense(10))
#Adding Activation function
# Here, we are using softmax function because we have multiclass classsification
keras_model_2.add(Activation('softmax'))
keras_model_2.summary()
#Setting the learning rate in sgd
sgd = optimizers.SGD(learning_rate = 0.0001)
# compiling the ANN classifier
keras_model_2.compile(optimizer = sgd, loss = 'categorical_crossentropy', metrics = ['accuracy'])
# Fitting the ANN to the Training data
history_2 = keras_model_2.fit(X_train, y_train, validation_data=(X_val,y_val),batch_size = 200, epochs = 10, verbose = 1)
results_2 = keras_model_2.evaluate(X_val, y_val)
print('Val_acc using Batch ANN adam : ', results_2[1])
#Store the accuracy results for each model in a dataframe for final comparison
tempResultsDf = pd.DataFrame({'Method':['NN_Batch_sgd'], 'accuracy': [results_2[1]]},index={'3'})
results_on_val= pd.concat([results_on_val, tempResultsDf])
results_on_val = results_on_val[['Method', 'accuracy']]
results_on_val
##Initialize the Artificial Neural Network Classifier
keras_model_3 = Sequential()
# Input Layer
#Adding Input layer and activation functions ReLU
keras_model_3.add(Dense(512, kernel_initializer='he_normal',input_shape = (1024, )))
#Adding BatchNormalization Layer
keras_model_3.add(BatchNormalization())
#Adding Activation function
keras_model_3.add(Activation('relu'))
#Adding Dropout Layer
keras_model_3.add(Dropout(0.5))
#Hidden Layer 1
#Adding first Hidden layer
keras_model_3.add(Dense(256,kernel_initializer='he_normal'))
#Adding BatchNormalization Layer
keras_model_3.add(BatchNormalization())
#Adding Activation function
keras_model_3.add(Activation('relu'))
#Adding Dropout Layer
keras_model_3.add(Dropout(0.5))
#Hidden Layer 2
#Adding second Hidden layer
keras_model_3.add(Dense(128, kernel_initializer='he_normal'))
#Adding BatchNormalization Layer
keras_model_3.add(BatchNormalization())
#Adding Activation function
keras_model_3.add(Activation('relu'))
#Adding Dropout Layer
keras_model_3.add(Dropout(0.5))
#Hidden Layer 3
#Adding third Hidden layer
keras_model_3.add(Dense(64, kernel_initializer='he_normal'))
#Adding BatchNormalization Layer
keras_model_3.add(BatchNormalization())
#Adding Activation function
keras_model_3.add(Activation('relu'))
#Adding Dropout Layer
keras_model_3.add(Dropout(0.5))
#Hidden Layer 4
#Adding fourth Hidden layer
keras_model_3.add(Dense(32, kernel_initializer='he_normal'))
#Adding BatchNormalization Layer
keras_model_3.add(BatchNormalization())
#Adding Activation function
keras_model_3.add(Activation('relu'))
#Adding Dropout Layer
keras_model_3.add(Dropout(0.5))
# Output Layer
#Adding output layer which is of 10 nodes (digidts)
keras_model_3.add(Dense(10))
#Adding Activation function
# Here, we are using softmax function because we have multiclass classsification
keras_model_3.add(Activation('softmax'))
keras_model_3.summary()
#Setting the learning rate in adam
adam = optimizers.Adam(learning_rate = 0.0001)
# compiling the ANN classifier
keras_model_3.compile(optimizer = adam, loss = 'categorical_crossentropy', metrics = ['accuracy'])
# Fitting the ANN to the Training data
history_3 = keras_model_3.fit(X_train, y_train, validation_data=(X_val,y_val),batch_size = 200, epochs = 100, verbose = 1)
results_3 = keras_model_3.evaluate(X_val, y_val)
print('Val_acc using BatchNorm and Dropout adam : ', results_3[1])
#Store the accuracy results for each model in a dataframe for final comparison
tempResultsDf = pd.DataFrame({'Method':['NN_Batch_Drop_adam'], 'accuracy': [results_3[1]]},index={'4'})
results_on_val= pd.concat([results_on_val, tempResultsDf])
results_on_val = results_on_val[['Method', 'accuracy']]
results_on_val
##Initialize the Artificial Neural Network Classifier
keras_model_final = Sequential()
# Input Layer
#Adding Input layer and activation functions ReLU
keras_model_final.add(Dense(512, kernel_initializer='he_normal',input_shape = (1024, )))
#Adding BatchNormalization Layer
keras_model_final.add(BatchNormalization())
#Adding Activation function
keras_model_final.add(Activation('relu'))
#Hidden Layer 1
#Adding first Hidden layer
keras_model_final.add(Dense(256, kernel_initializer='he_normal'))
#Adding BatchNormalization Layer
keras_model_final.add(BatchNormalization())
#Adding Activation function
keras_model_final.add(Activation('relu'))
#Hidden Layer 2
#Adding second Hidden layer
keras_model_final.add(Dense(128, kernel_initializer='he_normal'))
#Adding BatchNormalization Layer
keras_model_final.add(BatchNormalization())
#Adding Activation function
keras_model_final.add(Activation('relu'))
#Hidden Layer 3
#Adding third Hidden layer
keras_model_final.add(Dense(64, kernel_initializer='he_normal'))
#Adding BatchNormalization Layer
keras_model_final.add(BatchNormalization())
#Adding Activation function
keras_model_final.add(Activation('relu'))
#Hidden Layer 4
#Adding fourth Hidden layer
keras_model_final.add(Dense(32, kernel_initializer='he_normal'))
#Adding BatchNormalization Layer
keras_model_final.add(BatchNormalization())
#Adding Activation function
keras_model_final.add(Activation('relu'))
# Output Layer
#Adding output layer which is of 10 nodes (digidts)
keras_model_final.add(Dense(10))
#Adding Activation function
# Here, we are using softmax function because we have multiclass classsification
keras_model_final.add(Activation('softmax'))
keras_model_final.summary()
#Setting the learning rate in sgd
adam = optimizers.Adam(learning_rate = 0.0001)
# compiling the ANN classifier
keras_model_final.compile(optimizer = adam, loss = 'categorical_crossentropy', metrics = ['accuracy'])
# Fitting the ANN to the Training data
history_final = keras_model_final.fit(X_train, y_train, validation_data=(X_test,y_test),batch_size = 200, epochs = 100, verbose = 1)
results_final = keras_model_final.evaluate(X_test, y_test)
print('Test accuracy : ', results_final[1])
print(history.history.keys())
# summarize history for accuracy
plt.plot(history_final.history['accuracy'])
plt.plot(history_final.history['val_accuracy'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
# summarize history for loss
plt.plot(history_final.history['loss'])
plt.plot(history_final.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
Image 1
#Showing the image
plt.imshow(X_test[5].reshape(32,32),cmap='gray')
#Predicting the digits
#keras_model_final.predict_classes(X_test)[5]
#predict_x=model.predict(X_test)
classes_x=np.argmax(keras_model_final.predict(X_test)[5])#,axis=1)
classes_x
Image 2
#Showing the image
plt.imshow(X_test[10].reshape(32,32),cmap='gray')
Image 3
#Predicting the digits
#keras_model_final.predict_classes(X_test)[112]
classes_x=np.argmax(keras_model_final.predict(X_test)[112])#,axis=1)
classes_x
#Predicting for all images
y_pred= np.argmax(keras_model_final.predict(X_test),axis=1)
#keras_model_final.predict_classes(X_test)
print(y_pred)
results_on_val